diff --git a/assets/css/main.css b/assets/css/main.css index f92cd10..06e3ffc 100755 --- a/assets/css/main.css +++ b/assets/css/main.css @@ -598,3 +598,25 @@ img[alt=mitel] { .articles article:hover { background-color: #dbeaf0; } + +/* Article-specific styles +* --------------------------------------------*/ + +.schema-design-nosql table { + border-collapse: collapse; +} + +.schema-design-nosql table p { + margin: 0; +} + +.schema-design-nosql table th, +.schema-design-nosql table td { + padding: 5px; + border: 1px solid #abafb3; +} + +.schema-design-nosql table td:nth-child(1), +.schema-design-nosql table td:nth-child(2) { + white-space: nowrap; +} diff --git a/content/articles/2024-10-08-schema-design-part-1.md b/content/articles/2024-10-08-schema-design-part-1.md new file mode 100644 index 0000000..ede1e1d --- /dev/null +++ b/content/articles/2024-10-08-schema-design-part-1.md @@ -0,0 +1,180 @@ ++++ +title = "Schema Design - Part 1: Trigger-based SQL" +slug = "schema-design-sql" +date = "2024-10-08T00:00:00-00:00" +tags = ["schema-design", "sql"] +showpagemeta = true ++++ + +TODO + +## Deposition + +TODO + +[earlier post](https://eric-fritz.com/articles/deposition/) + +[earlier post](https://eric-fritz.com/articles/easy-peasy-sql-audit-tables/) + + +{{< lightbox src="/images/schema-design/deposition-schema.png" anchor="deposition-schema" >}} + +TODO - After insert, update, delete on builds: + +```sql +CREATE FUNCTION update_product( + target_team_id integer, target_name text +) RETURNS void LANGUAGE plpgsql AS $$ BEGIN + UPDATE products p + SET + active = ep.active, + deployed = ep.deployed, + flagged = ep.flagged, + active_flagged = ep.active_flagged, + deploy_flagged = ep.deploy_flagged + FROM expanded_products ep + WHERE + ep.team_id = target_team_id AND p.team_id = target_team_id AND + ep.name = target_name AND p.name = target_name; +END $$; + +CREATE VIEW expanded_products AS + SELECT + p.team_id, + p.name, + COALESCE(bool_or(b.active), false) AS active, + COALESCE(bool_or(b.deployed), false) AS deployed, + COALESCE(bool_or(b.flagged), false) AS flagged, + COALESCE(bool_or((b.active AND b.flagged)), false) AS active_flagged, + COALESCE(bool_or((b.deployed AND b.flagged)), false) AS deploy_flagged + FROM products p + LEFT JOIN builds b ON ... + GROUP BY p.team_id, p.name; +``` + +TODO / After insert on builds, insert/delte on build deployments + +```sql +CREATE FUNCTION set_active_build( + target_team_id integer, target_name text +) RETURNS void LANGUAGE plpgsql AS $$ BEGIN + UPDATE builds b + SET active = EXISTS ( + SELECT 1 FROM build_deployments d WHERE + d.build_product_name = b.product_name AND + d.build_version = b.version AND + d.build_token = b.build_token + ) OR EXISTS ( + SELECT 1 FROM active_builds ab WHERE + b.product_name = ab.product_name AND + b.version = ab.version AND + b.build_token = ab.build_token + ) + WHERE + b.product_team_id = target_team_id AND + b.product_name = target_name; +END $$; + +CREATE VIEW active_builds AS + SELECT b.* FROM products p JOIN builds b + ON ... + AND NOT EXISTS ( + SELECT 1 FROM builds cmp WHERE + cmp.product_team_id = p.team_id AND cmp.product_name = p.name AND + cmp.build_datetime > b.build_datetime + ) + ORDER BY b.build_datetime DESC; +``` + +TODO / After insert, delete on build_deployments + +```sql +CREATE FUNCTION update_build_deployment( + target_team_id integer, target_name text, + target_version text, target_build_token text +) RETURNS void LANGUAGE plpgsql AS $$ BEGIN + UPDATE builds b + SET deployed = bd.deployed + FROM expanded_build_deployments bd + WHERE + bd.product_team_id = target_team_id AND b.product_team_id = target_team_id AND + bd.product_name = target_name AND b.product_name = target_name AND + bd.version = target_version AND b.version = target_version AND + bd.build_token = target_build_token AND b.build_token = target_build_token; +END $$; + +CREATE VIEW expanded_build_deployments AS + SELECT + b.product_team_id, + b.product_name, + b.version, + b.build_token, + (count(bd.deployment_token) > 0) AS deployed + FROM builds b + LEFT JOIN build_deployments bd ON ... + GROUP BY b.product_team_id, b.product_name, b.version, b.build_token; +``` + +TODO / After insert/update/delete on dependency_versions and dependency_version_flags + +```sql +CREATE FUNCTION update_dependencies( + target_source text, target_name text +) RETURNS void LANGUAGE plpgsql AS $$ BEGIN + UPDATE dependencies d + SET flagged = ed.flagged + FROM expanded_dependencies ed + WHERE + ed.source = target_source AND d.source = target_source AND + ed.name = target_name AND d.name = target_name; +END $$; + +CREATE VIEW expanded_dependencies AS + SELECT + d.source, + d.name, + EXISTS ( + SELECT 1 FROM dependency_version_flags dvf WHERE + d.source = dvf.dependency_version_source AND + d.name = dvf.dependency_version_name + ) AS flagged + FROM dependencies d; +``` + +TODO / After insert/update/delete on dependency_version_flags + +```sql +CREATE FUNCTION update_build( + target_team_id integer, target_name text, + target_version text, target_build_token text +) RETURNS void LANGUAGE plpgsql AS $$ DECLARE items RECORD; BEGIN + UPDATE builds b + SET flagged = eb.flagged + FROM expanded_builds eb + WHERE + eb.product_team_id = target_team_id AND b.product_team_id = target_team_id AND + eb.product_name = target_name AND b.product_name = target_name AND + eb.version = target_version AND b.version = target_version AND + eb.build_token = target_build_token AND b.build_token = target_build_token; +END $$; + +CREATE VIEW expanded_builds AS + SELECT + b.product_team_id, + b.product_name, + b.version, + b.build_token, + (count(DISTINCT dvf.dependency_version_flag_id) > 0) AS flagged + FROM + ( + builds b + LEFT JOIN build_dependency_versions bdv ON ... + LEFT JOIN dependency_versions v ON ... + LEFT JOIN dependency_version_flags dvf ON + ... AND + (dvf.apply_globally OR dvf.team_id = b.product_team_id) + ) + GROUP BY b.product_team_id, b.product_name, b.version, b.build_token; +``` + +TODO diff --git a/content/articles/2024-10-08-schema-design-part-2.md b/content/articles/2024-10-08-schema-design-part-2.md new file mode 100644 index 0000000..e72fb93 --- /dev/null +++ b/content/articles/2024-10-08-schema-design-part-2.md @@ -0,0 +1,15 @@ ++++ +title = "Schema Design - Part 2: SQL-Based Document Store" +slug = "schema-design-document-store" +date = "2024-10-08T00:00:00-00:00" +tags = ["schema-design", "sql"] +showpagemeta = true ++++ + +TODO + +## LSIF Server + +TODO + +{{< lightbox src="/images/schema-design/lsif-schema.png" anchor="lsif-schema" >}} diff --git a/content/articles/2024-10-08-schema-design-part-3.md b/content/articles/2024-10-08-schema-design-part-3.md new file mode 100644 index 0000000..c3e358f --- /dev/null +++ b/content/articles/2024-10-08-schema-design-part-3.md @@ -0,0 +1,262 @@ ++++ +title = "Schema Design - Part 3: NoSQL" +slug = "schema-design-nosql" +date = "2024-10-08T00:00:00-00:00" +tags = ["schema-design", "nosql"] +showpagemeta = true ++++ + +TODO + +## Manhunt + +At Mitel, I authored **Reflex**, a product that triggered and managed *incidents* from external events. An incident opened a common line of communication (chat room, conference call, shared documents, etc) for the parties involved to communicate and troubleshoot an active incident. Incidents can be defined in stages, and if a condition is met (or not met) within a certain time period, the incident escalates to the next stage. Generally, these conditions would revolve around the acknowledgement of a user tied to an incident. + +The service that dealt with contacting users and dealing with their responses was called **Manhunt** internally. Behaviorally, this service is similar to [PagerDuty](https://www.pagerduty.com/). The service defines users along with their preferred contact methods (automated phone call, text message, email, webhook, etc) and their relative order. A user may wish to be contacted via email and text with phone number A, and then a call to phone number B if there is no response within ten minutes. The service also manages and performs user *searches*. A search is opened with metadata used to template the contact attempts. A search ends once the user has responded to some method (via a [DTMF](https://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling) response to a phone call, a text response, following a link in an email, etc). + +This application was written to be hosted on AWS, so we chose [DynamoDB](https://aws.amazon.com/dynamodb/). We chose to go with the NoSQL approach due to the data we needed to track being minimally relational, and in large part due to the availability of [DynamoDB Streams](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html), which allowed us to attach event listeners to updates to the database. The remainder of this article outlines the design of the database schema for this service, and how the schema provides us a source of events to drive user searches. + +### Doing It Wrong + +DynamoDB is Amazon's key-value store offering based on the [whitepaper](https://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf) from 2007. Apache [Cassandra](http://cassandra.apache.org/) is the open-source database that revolves around these same ideas. + +At Mitel, we stored most of our data in PostgreSQL, but our deployment did not yet support [multi-master replication](https://en.wikipedia.org/wiki/Multi-master_replication#PostgreSQL) and was only writeable from our Chicago data center. We used Cassandra to store some class critical data which it could be written from our Kansas City data center in the event of a DC failover or network partition. This allowed us to continue processing calls, albeit in a degraded level of service. + +As I was brushing up on the literature for DynamoDB in order to design this new application, I discovered that we were using Cassandra in a **very** wrong way. Despite the consulting from Datastax, we were still using Cassandra like a dumb key-value store. We were doing *some* things correctly (and were well aware to avoid some common issues), but we tended to design our schemas from a mostly-relational perspective: create a table for each entity type, and choose a primary key based on some data we always knew about (a tenant ID in our case) and whatever other values we were likely to search by. This didn't allow us to fetch related entities without subsequent queries, so our code likely suffered from a hand-rolled version of the [n+1 query problem](https://www.sitepoint.com/silver-bullet-n1-problem/). + +The eye-opening resource was [this video](https://youtu.be/HaEPXoXVf2k?t=2962) from AWS re:Invent 2018 on advanced design patterns for DynamoDB (see also, [the slides](https://www.slideshare.net/AmazonWebServices/amazon-dynamodb-deep-dive-advanced-design-patterns-for-dynamodb-dat401-aws-reinvent-2018pdf)). In this video, Rick Houlihan suggest that [a single table](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-general-nosql-design.html#bp-general-nosql-design-concepts) should be enough for the vast majority of applications. + +*I instinctively called bullshit.* This chief technologist **clearly** doesn't know what he's talking about. It turns out that statement is correct, despite how freakishly un-intuitive it seems at first glance. It took me about a half-dozen re-watches of the video before it started to click. The magic concept that broke my relational design bias was [index overloading](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-gsi-overloading.html). This concept can be followed tersely by simply **not caring what your table column names are**. Once you accept this possibility into your heart, you really can fit all the data into a single table, and you really can design a schema for which *join-like* queries are freakishly efficient. + +### The Database Schema + +There is only one concept you really need to know about DynamoDB in order to understand the following schema: when querying for rows you **must** supply a [partition key](https://aws.amazon.com/blogs/database/choosing-the-right-dynamodb-partition-key/) value (for an equivalence comparison), and **can** supply a [sort key](https://aws.amazon.com/blogs/database/using-sort-keys-to-organize-data-in-amazon-dynamodb/) value (for a equivalence, numeric, range, or prefix comparison). That's really the only way you can fetch data (without getting into [post-fetch filtering](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.FilterExpression)). + +Each table in DynamoDB has exactly one partition key and up to one sort key that constitutes its *primary key*. A table can be configured with a handful of [Global Secondary Indexes](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.html), which essentially creates additional primary keys on the table, which can be queried in the same manner. Behind the scenes, a write to a table with a GSI will transactionally write a projection of the same data to a different set of replicas, which geometrically increases the cost of writes to the table. + +**Let's get into it.** + +The following is a small set of sample data of the Manhunt DynamoDB table. It defines a partition key, a sort key, and two GSIs. The remaining attributes are un-indexed (they can't be used to query a row, but will be returned with the row and can also be used to filter data) and be thought of as unstructured JSON data. + +In each row of this visualization there is an emphasized **field name** along with the `value`. The field name is how the value is interpreted once it comes back from the database, despite being stored by the generic keys `PK`, `SK`, `GSI1PK`, etc. Notice that the meanings of these values are overloaded for the same column in different rows: the partition key value for row 1 is a search identifier, but in row 2 it is a contact identifier instead. Assigning application-specific meaning to these columns on a row-by-row basis is the trick for fitting your application data into a single table. + +{{< lightbox src="/images/schema-design/manhunt-schema.png" anchor="manhunt-schema" >}} + +This table shows four kinds of entities: *contact* (rows 5 and 6), *contactMethod* (rows 2, 3, and 7), *search* (rows 9 and 10), and *searchAttempt* (rows 1, 4, and 8). Additional fields for each entity are not shown for brevity. A contact method row has additional un-indexed fields to denote the contact method type, the destination, the number of seconds to wait for attempting to use this method, and a enabled/disabled flag. A search row has additional un-indexed fields for the search metadata and data about the search's resolution or cancellation. A search attempt row has additional un-indexed fields for the denormalized contact method data used for the attempt, the timestamp of the attempt, and whether/how the user responded. + +Additionally, each row has an `entityType` field (omitted here) that holds the name of the entity the row represents. This is used to ensure application consistency on queries so that a 404 can be returned when requesting a search with the id of a contact entity, instead of failing to deserialize the row into an incompatible struct. This is not strictly necessary, but is a low-cost solution to making API endpoints a bit more ergonomic in these edge cases. + +{{< lightbox src="/images/schema-design/manhunt-schema-relations.png" anchor="manhunt-schema-relations" >}} + +Each row has a very limited number of *slots* to put values by which the row can be queried. In order to discuss which values were chosen, let's take a deeper look into row 10, along with a few of the rows that share the same field and value pairs. Row 10 represents a search entity for the contact represented by row 5. Rows 1 and 4 represent an attempt of the search. + +One schema design trick that I've ended up using frequently is to group rows hierarchically. The value of the partition key is the same for a *parent* entity as it is for its *children* (in this application, searches and search attempts, or contacts and contact methods). This works well for entity relations that would be [one-to-many relationship](https://en.wikipedia.org/wiki/One-to-many_(data_model)) in SQL parlance. The partition key for each of these rows is the unique identifier of the parent entity, and the sort key for each of the rows representing child entities is the unique identifier of the child entity. This leaves us with a choice for the sort key on the parent entity. In this application I chose to use a canned string representing the entity type (with the values `search` or `contact`, which happens to also be the value of the row's `entityType` field). + +This schema makes it easy to query a particular top-level entity by its ID, as you also need to know the exact sort key for such queries (query where **PK** = $sid$ and **SK** = `'search'`). This also makes it easy to get a top-level entity along with all of its children (query where **PK** = $sid$). This second type of query takes a small amount of the work on the application side in order to decode each row as either a parent, or one of several children types. In this application, we have a distinct child entity for each parent, but in the Reflex application, we had parent entities with many more children (incident entities owned a set of resources, a set of searches, a set of attached users, and a set of reaction instances). This is also another benefit of tracking an entity type in the row, regardless of the existence of other fields: as this field exists for all rows, it's a safe thing to read before deserializing the remaining fields and tells us into which struct it can be deserialized. + +Using this schema, you cannot query for *all* searches from the primary key without a table scan. We can, however, get all searches for a particular contact, given the contact entity's identifier -- this just requires us to query with the first global secondary index instead of the primary key. If there were other entity types which also encoded a contact identifier into the **GSI1PK** field, we could use the same entityType trick for filtering and safe deserialization. Similarly, we can get contacts for a particular domain (which is how the application organizes multi-tenancy) by querying the same global secondary index with a domain identifier. + +We maintain a second global secondary index that is similar to the first global secondary index for search rows. The only difference is that the *status* of the search is prepended to the contact identifier. This allows us to query for all *active* searches for a particular contact (or canceled, successful, or failed searches) by querying a concatenation of the target search status and the target contact identifier. These two indexes could be reasonably collapsed into one (saving some space and some insertion time), but it was important for our application needs to be able to have a stable ordering searches for a contact with a particular status and searches for a contact regardless of it status. Using only the first global secondary index for this task would mean we would have to filter the searches by status post-fetch, which is far less efficient when the number of search records for a user are high. Using only the second global secondary index for this task would get us closer, but would require much more logic on the application side, as we would have to perform a [scatter-gather](https://aws.amazon.com/blogs/big-data/scaling-writes-on-amazon-dynamodb-tables-with-global-secondary-indexes/) query and combine the results with the correct sort order. Without much more complicated logic, an additional page of results may seem to go back in time from the user's perspective. This situation occurs with searches $s_1$, $s_2$, and $s_3$ where $s_1$ and $s_2$ are returned on the previous page, $s_2$ and $s_3$ have different statuses, and $s_1 < s_3 < s_2$. + +### Possible Queries + +Given the schema above, the following queries are possible. + + + +
Key ConditionsDescription
+ +**PK** = $sid$ + + + +**SK** = `'search'` + + + +Get a search by id $sid$. + +
+ +**PK** = $sid$ + + + +Get a search by id $sid$ along with its related attempts. + +
+ +**PK** = $cid$ + + + +**SK** = `'contact'` + + + +Get a contact by id $cid$. + +
+ +**PK** = $cid$ + + + +Get a contact by id $cid$ along with its contact methods. + +
+ +**GSI1PK** = $did$ + + + +Get all contacts belonging to a domain by id $did$. Results are ordered alphabetically by name. + +
+ +**GSI1PK** = $cid$ + + + +Get all searches for a contact by id $cid$. Results are ordered chronologically. + +
+ +**GSI1PK** = $cid$ + + + +**GSI1SK** < $d$ + + + +Get all searches for a contact by id $cid$ before a particular date $d$. Alternatively, get all records after a particular date by changing the sort key comparison operator. + +
+ +**GSI2PK** = $s$ `#` $cid$ + + + +Get all searches with a particular status $s$ for a contact by id $cid$. Results are ordered chronologically. + +
+ +**GSI2PK** = $s$ `#` $cid$ + + + +**GSI1SK** < $d$ + + + +Get all searches with a particular status $s$ for a contact by id $cid$ before a particular date $d$. Alternatively, get all records after a particular date by changing the sort key comparison operator. + +
+ +Each of these queries are **extremely** efficient. Because each query requires the partition key in full, and the partition key determines what node data occurs on, each query touches at most one node where the entire result is located. If a sort-key is provided, all matching results can be given in log-linear time as the data on that node is ordered by the sort key. Each query costs at most $\mathcal{O}(log_2(n) + s)$, where $n$ is the number of rows stored on the node and $s$ is the number of matching rows. + +If I were to design this same application a year prior, I would have most likely come up with a sub-optimal and multi-table solution where fetching a search with $n$ attempts would have taken one query from the *searches* table and another $n$ queries from the *search_attempts* table. + +I'm glad I've improved. + +### Event Processing with DynamoDB Streams + +TODO + + + + + + + + + + + + + + + + +package handlers + +import ( + "fmt" + + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + "github.com/efritz/nacelle" + "github.com/mitel-networks/go-aws-lib/golib/handlerutil" + "github.com/mitel-networks/reflex/internal/manhunt/db" +) + +type DynamoDBHandler struct { + Logger nacelle.Logger `service:"logger"` + DB db.DatabaseManager `service:"db"` + SQSAPI sqsiface.SQSAPI `service:"sqs"` + handlerFuncs map[string]handlerutil.DynamoDBHandlerFunc + queueURL string +} + +func NewDynamoDBHandler() handlerutil.Handler { + return handlerutil.NewBaseDynamoDBHandler(&DynamoDBHandler{}) +} + +func (h *DynamoDBHandler) Init(config nacelle.Config) error { + handlerConfig := &DynamoDBHandlerConfig{} + if err := config.Load(handlerConfig); err != nil { + return err + } + + h.handlerFuncs = map[string]handlerutil.DynamoDBHandlerFunc{ + "search#INSERT": h.handleNewSearch, + } + + h.queueURL = handlerConfig.ManhuntQueueURL + return nil +} + +func (h *DynamoDBHandler) GetHandlerFuncFor(recordHash string) handlerutil.DynamoDBHandlerFunc { + if handlerFunc, ok := h.handlerFuncs[recordHash]; ok { + return handlerFunc + } + + return nil +} + +func (h *DynamoDBHandler) handleNewSearch(record events.DynamoDBEventRecord, logger nacelle.Logger) error { + logger.Debug("Constructing search from image") + + search, err := db.SearchFromImage(record.Change.NewImage) + if err != nil { + return err + } + + logger.Info("Handling new search %s", search.SearchID) + + contact, err := h.getContact(search.DomainID, search.ContactID, logger) + if err != nil { + return err + } + + return queueNext( + h.SQSAPI, + h.queueURL, + search, + contact.ContactMethods, + -1, + h.Logger, + ) +} + +func (h *DynamoDBHandler) getContact(domainID, contactID string, logger nacelle.Logger) (*db.FullContact, error) { + logger.Debug("Retrieving contact %s", contactID) + + contact, err := h.DB.GetContact(domainID, contactID) + if err != nil { + return nil, fmt.Errorf("failed to retreive contact (%s)", err.Error()) + } + + return contact, nil +} diff --git a/static/assets/images/schema-design/deposition-schema.png b/static/assets/images/schema-design/deposition-schema.png new file mode 100644 index 0000000..33e92dd Binary files /dev/null and b/static/assets/images/schema-design/deposition-schema.png differ diff --git a/static/assets/images/schema-design/lsif-schema.png b/static/assets/images/schema-design/lsif-schema.png new file mode 100644 index 0000000..ebb1287 Binary files /dev/null and b/static/assets/images/schema-design/lsif-schema.png differ diff --git a/static/assets/images/schemas/manhunt-schema-relations.png b/static/assets/images/schema-design/manhunt-schema-relations.png similarity index 100% rename from static/assets/images/schemas/manhunt-schema-relations.png rename to static/assets/images/schema-design/manhunt-schema-relations.png diff --git a/static/assets/images/schemas/manhunt-schema.png b/static/assets/images/schema-design/manhunt-schema.png similarity index 100% rename from static/assets/images/schemas/manhunt-schema.png rename to static/assets/images/schema-design/manhunt-schema.png diff --git a/static/assets/images/schema-design/sources/deposition.tex b/static/assets/images/schema-design/sources/deposition.tex new file mode 100644 index 0000000..46afeca --- /dev/null +++ b/static/assets/images/schema-design/sources/deposition.tex @@ -0,0 +1,142 @@ +\documentclass[tikz,border=5pt]{standalone} +\usepackage{xcolor} +\usetikzlibrary{calc,positioning,shapes.multipart,shapes} + +\definecolor{cA}{HTML}{646FA9} +\definecolor{cB}{HTML}{49A9B4} +\definecolor{cC}{HTML}{6BCD69} +\definecolor{cD}{HTML}{9B9C78} +\definecolor{cE}{HTML}{CBA18C} +\definecolor{cF}{HTML}{C4737C} +\definecolor{cG}{HTML}{DBDBDB} +\definecolor{cX}{HTML}{6A0DAD} + +\begin{document} +\tikzset{basic/.style={ + draw, + rectangle split, + rectangle split parts=2, + minimum width=8cm, + text width=7cm, + align=left, + font=\ttfamily + } +} +\begin{tikzpicture} + \node[basic, rectangle split part fill={cA,white}] (products) { + products + + \nodepart{second} + name: char \hfill PK \\ + team\_id: integer \hfill PK \\ + % + \color{cX} + active: boolean \\ + active\_flagged: boolean \\ + deploy\_flagged: boolean \\ + deployed: boolean \\ + flagged: boolean + }; + + \node[basic,rectangle split part fill={cB,white},below=2cm of products] (builds) { + builds + + \nodepart{second} + version: char \hfill PK \\ + build\_token: char \hfill PK \\ + \color{cA} + product\_name: char \hfill PK \\ + product\_team\_id: integer \hfill PK \\ + \color{black} + phantom: boolean \\ + build\_datetime: timestamp \\ + triggered\_by\_user\_id: integer \\ + % + \color{cX} + active: boolean \\ + deployed: boolean \\ + flagged: boolean \\ + last\_deploy\_datetime: timestamp + }; + + \node[basic,rectangle split part fill={cE,white},below=2cm of builds] (build_deployments) { + build\_deployments + + \nodepart{second} + deployment\_token: char \hfill PK \\ + deploy\_datetime: timestamp \\ + \color{cB} + build\_version: char \\ + build\_token: char \\ + build\_product\_name: char \\ + build\_team\_id: integer \\ + \color{black} + triggered\_by\_user\_id: integer + }; + + \node[basic,rectangle split part fill={cF,white},right=2cm of builds] (build_dependency_versions) { + build\_dependency\_versions + + \nodepart{second} + \color{cB} + build\_version: char \hfill PK \\ + build\_token: char \hfill PK \\ + build\_product\_name: char \hfill PK \\ + build\_team\_id: integer \hfill PK \\ + \color{cD} + dependency\_version\_version: char \hfill PK \\ + dependency\_version\_name: char \hfill PK \\ + dependency\_version\_source: char \hfill PK + }; + + \node[basic,rectangle split part fill={cD,white},right=2cm of build_dependency_versions] (dependency_versions) { + dependency\_versions + + \nodepart{second} + version: char \hfill PK \\ + \color{cC} + dependency\_name: char \hfill PK \\ + dependency\_source: char \hfill PK \\ + \color{black} + lookup\_hash: char + }; + + \node[basic,rectangle split part fill={cC,white},above=2cm of dependency_versions] (dependencies) { + dependencies + + \nodepart{second} + name: char \hfill PK \\ + source: char \hfill PK \\ + lookup\_hash: char \\ + % + \color{cX} + flagged: boolean + }; + + \node[basic,rectangle split part fill={cG,white},below=2cm of dependency_versions] (dependency_version_flags) { + dependency\_version\_flags + + \nodepart{second} + depencency\_version\_flag\_id: integer \hfill PK \\ + description: char \\ + apply\_globally: boolean \\ + flag\_datetime: timestamp \\ + dependency\_version\_flag\_id: integer \\ + \color{cD} + dependency\_version\_version: char \\ + dependency\_version\_name: char \\ + dependency\_version\_source: char \\ + \color{black} + team\_id: integer \\ + reported\_by\_user\_id: integer + }; + + + \draw[-latex] ([yshift=0pt]$(builds.north)$) -- ([yshift=0pt]$(products.south)$); + \draw[-latex] ([yshift=0pt]$(build_deployments.north)$) -- ([yshift=0pt]$(builds.south)$); + \draw[-latex] ([yshift=0pt]$(build_dependency_versions.west)$) -- ([yshift=0pt]$(builds.east)$); + \draw[-latex] ([yshift=0pt]$(build_dependency_versions.east)$) -- ([yshift=0pt]$(dependency_versions.west)$); + \draw[-latex] ([yshift=0pt]$(dependency_version_flags.north)$) -- ([yshift=0pt]$(dependency_versions.south)$); + \draw[-latex] ([yshift=0pt]$(dependency_versions.north)$) -- ([yshift=0pt]$(dependencies.south)$); +\end{tikzpicture} +\end{document} diff --git a/static/assets/images/schema-design/sources/lsif.tex b/static/assets/images/schema-design/sources/lsif.tex new file mode 100644 index 0000000..d8bde10 --- /dev/null +++ b/static/assets/images/schema-design/sources/lsif.tex @@ -0,0 +1,270 @@ + \documentclass[tikz,border=5pt]{standalone} +\usepackage{xcolor} +\usetikzlibrary{calc,positioning,shapes.multipart,shapes} +\usepackage{listings} + +\definecolor{cA}{HTML}{646FA9} +\definecolor{cB}{HTML}{49A9B4} +\definecolor{cC}{HTML}{6BCD69} +\definecolor{cD}{HTML}{9B9C78} +\definecolor{cE}{HTML}{CBA18C} +\definecolor{cF}{HTML}{C4737C} +\definecolor{cG}{HTML}{DBDBDB} +\definecolor{cX}{HTML}{6A0DAD} + +\colorlet{punct}{red!60!black} +\definecolor{background}{HTML}{EEEEEE} +\definecolor{delim}{RGB}{20,105,176} +\colorlet{numb}{magenta!60!black} + +\lstdefinelanguage{json}{ + basicstyle=\normalfont\ttfamily, + numberstyle=\scriptsize, + stepnumber=1, + numbersep=8pt, + showstringspaces=false, + breaklines=true, + literate= + *{0}{{{\color{numb}0}}}{1} + {1}{{{\color{numb}1}}}{1} + {2}{{{\color{numb}2}}}{1} + {3}{{{\color{numb}3}}}{1} + {4}{{{\color{numb}4}}}{1} + {5}{{{\color{numb}5}}}{1} + {6}{{{\color{numb}6}}}{1} + {7}{{{\color{numb}7}}}{1} + {8}{{{\color{numb}8}}}{1} + {9}{{{\color{numb}9}}}{1} + {:}{{{\color{punct}{:}}}}{1} + {,}{{{\color{punct}{,}}}}{1} + {\{}{{{\color{delim}{\{}}}}{1} + {\}}{{{\color{delim}{\}}}}}{1} + {[}{{{\color{delim}{[}}}}{1} + {]}{{{\color{delim}{]}}}}{1}, +} + +\begin{document} +\tikzset{basic/.style={ + draw, + rectangle split, + rectangle split parts=2, + minimum width=8cm, + text width=7cm, + align=left, + font=\ttfamily + } +} +\tikzset{json/.style={ + draw, + dashed, + thick, + rectangle, + minimum width=8cm, + text width=7cm, + align=left + } +} +\begin{tikzpicture} + \node[basic, rectangle split part fill={cA,white}] (dumps) { + dumps + + \nodepart{second} + id: integer \hfill PK \\ + repository: char \\ + commit: char \\ + root: char \\ + visible\_at\_tip: boolean \\ + processed\_at: timestamp \\ + uploaded\_at: timestamp + }; + + \node[basic,rectangle split part fill={cB,white},left=2cm of dumps] (packages) { + packages + + \nodepart{second} + id: integer \hfill PK \\ + scheme: char \\ + name: char \\ + version: char \\ + \color{cA} + dump\_id: integer + }; + + \node[basic,rectangle split part fill={cC,white},right=2cm of dumps] (references) { + references + + \nodepart{second} + id: integer \hfill PK \\ + scheme: char \\ + name: char \\ + version: char \\ + filter: blob \\ + \color{cA} + dump\_id: integer + }; + + + \node[basic, rectangle split part fill={cD,white},below=3cm of dumps, xshift=-1cm] (documents) { + documents + + \nodepart{second} + path: char \hfill PK \\ + data: blob \\ + }; + + \node[basic, rectangle split part fill={cG,white},below=2cm of documents, xshift=1cm] (resultChunks) { + resultChunks + + \nodepart{second} + id: integer \hfill PK \\ + data: blob \\ + }; + + \node[basic,rectangle split part fill={cE,white},left=1cm of documents] (definitions) { + definitions + + \nodepart{second} + id: integer \hfill PK \\ + scheme: char \\ + identifier: char \\ + \color{cA} + documentPath: char \\ + \color{black} + startLine: integer \\ + endLine: integer \\ + startCharacter: integer \\ + endCharacter: integer + }; + + \node[basic,rectangle split part fill={cF,white},right=3cm of documents] (sqliteReferences) { + references + + \nodepart{second} + id: integer \hfill PK \\ + scheme: char \\ + identifier: char \\ + \color{cA} + documentPath: char \\ + \color{black} + startLine: integer \\ + endLine: integer \\ + startCharacter: integer \\ + endCharacter: integer + }; + + \node[json,align=left, font=\ttfamily, below=3cm of definitions] (documentsSchema) { + \begin{lstlisting}[language=json] +{ + "ranges": { + 4: { + "startLine": 12, + "endLine": 12, + "startCharacter": 3, + "endCharacter": 6, + "definitionResultId": 5, + "referenceResultId": 6, + "hoverResultId": 7, + "monikerIds": [8], + }, + }, + "hoverResults": { + 7: "hover text", + }, + "monikers": + 8: { + "kind": "export", + "scheme": "npm", + "identifier": "foo:bar", + "packageId": 9, + }, + }, + "packages": { + 9: { + "name": "foo", + "version": "0.1.0", + }, + } +} + \end{lstlisting} + }; + + \node[json,align=left, font=\ttfamily, below=3cm of sqliteReferences] (resultChunkSchema) { + \begin{lstlisting}[language=json] +{ + "paths": { + 1: "file.ext", + }, + "results": { + 3: [ + { + "documentId": 1, + "rangeId": 4, + } + ], + 5: ..., + 6: ..., + }, +} + \end{lstlisting} + }; + + % Postgres + \node[rectangle, left=0.75cm of packages]{\includegraphics[width=2cm]{postgres.png}}; + \node[rectangle, left=0.75cm of definitions]{\includegraphics[width=2cm]{sqlite.png}}; + + % SQLite + \draw[dashed, thick] ($(definitions.north west)+(-0.25cm,0.25cm)$) rectangle ($(resultChunkSchema.south east)+(0.25cm,-7.125cm)$); + \draw[-latex, very thick] (dumps.south) -- ($(dumps.south)+(0cm,-1.4cm)$); + + % Relationships + \draw[-latex] ([yshift=0pt]$(references.west)$) -- ([yshift=0pt]$(dumps.east)$); + \draw[-latex] ([yshift=0pt]$(packages.east)$) -- ([yshift=0pt]$(dumps.west)$); + \draw[-latex] ([yshift=0pt]$(definitions.east)$) -- ([yshift=0pt]$(documents.west)$); + \draw[-latex] ([yshift=0pt]$(sqliteReferences.west)$) -- ([yshift=0pt]$(documents.east)$); + + % Document schema + \draw[dashed, thick] ($(documents.north west)+(+.25cm,-0.9cm)$) rectangle ($(documents.north west)+(+2.5cm,-1.5cm)$) {}; + \draw[-, dashed] ($(documents.north west)+(+.25cm,-0.9cm)$) -- (documentsSchema.north east); + \draw[-, dashed] ($(documents.north west)+(+.25cm,-1.5cm)$) -- (documentsSchema.south east); + + % Result chunks schema + \draw[dashed, thick] ($(resultChunks.north west)+(+.25cm,-0.9cm)$) rectangle ($(resultChunks.north west)+(+2.5cm,-1.5cm)$) {}; + \draw[-, dashed] ($(resultChunks.north west)+(+2.5cm,-0.9cm)$) -- (resultChunkSchema.north west); + \draw[-, dashed] ($(resultChunks.north west)+(+2.5cm,-1.5cm)$) -- (resultChunkSchema.south west); + + % Path relationship + \draw[fill=cD, opacity=0.25, dashed] ($(resultChunkSchema.north west)+(+2cm,-1.3cm)$) rectangle ($(resultChunkSchema.north west)+(+4.3cm,-1.7cm)$) {}; + \draw[-latex] ($(resultChunkSchema.north west)+(+3.5cm,-1.3cm)$) to[out=90, in=-45] ($(documents.south east)+(-0.0cm,0cm)$); + \draw[fill=cD, opacity=0.25, dashed] ($(resultChunkSchema.north west)+(+5.25cm,-3.4cm)$) rectangle ($(resultChunkSchema.north west)+(+5.75cm,-3.85cm)$) {}; + \draw[-latex] ($(resultChunkSchema.north west)+(+5.5cm,-3.4cm)$) to[out=90, in=-90] ($(resultChunkSchema.north west)+(+3.75cm,-1.7cm)$); + + % Range relationship + \draw[fill=cA, opacity=0.25, dashed] ($(documentsSchema.north west)+(+1.8cm,-1.7cm)$) rectangle ($(documentsSchema.north west)+(+7.25cm,-5.1cm)$) {}; + \draw[fill=cA, opacity=0.25, dashed] ($(resultChunkSchema.north west)+(+2.25cm,-3.8cm)$) rectangle ($(resultChunkSchema.north west)+(+5.125cm,-4.3cm)$) {}; + \draw[-latex] ($(resultChunkSchema.north west)+(+2.25cm,-4cm)$) -- ($(documentsSchema.north west)+(+7.25cm,-2.3cm)$); + + % Hover relationship + \draw[fill=cB, opacity=0.25, dashed] ($(documentsSchema.north west)+(+1.8cm,-6.35cm)$) rectangle ($(documentsSchema.north west)+(+7.25cm,-6.8cm)$) {}; + \draw[fill=cB, opacity=0.25, dashed] ($(documentsSchema.north west)+(+5.5cm,-4.25cm)$) rectangle ($(documentsSchema.north west)+(+6cm,-4.7cm)$) {}; + \draw[-latex] ($(documentsSchema.north west)+(+6cm,-4.45cm)$) to[out=0, in=90] ($(documentsSchema.north west)+(+6.8cm,-6.35cm)$); + + % Moniker relationship + \draw[fill=cC, opacity=0.25, dashed] ($(documentsSchema.north west)+(+1.8cm,-8cm)$) rectangle ($(documentsSchema.north west)+(+7.25cm,-9.75cm)$) {}; + \draw[fill=cC, opacity=0.25, dashed] ($(documentsSchema.north west)+(+5cm,-5.15cm)$) rectangle ($(documentsSchema.north west)+(+5.55cm,-4.65cm)$) {}; + \draw[-latex] ($(documentsSchema.north west)+(+5.25cm,-5.15cm)$) to[out=-90, in=90] ($(documentsSchema.north west)+(+5.8cm,-8cm)$); + + % Package relationship + \draw[fill=cD, opacity=0.25, dashed] ($(documentsSchema.north west)+(+1.8cm,-11.4cm)$) rectangle ($(documentsSchema.north west)+(+7.25cm,-12.3cm)$) {}; + \draw[fill=cD, opacity=0.25, dashed] ($(documentsSchema.north west)+(+4.6cm,-9.8cm)$) rectangle ($(documentsSchema.north west)+(+5.2cm,-9.3cm)$) {}; + \draw[-latex] ($(documentsSchema.north west)+(+5.2cm,-9.5cm)$) to[out=0, in=90] ($(documentsSchema.north west)+(+6.5cm,-11.4cm)$); + + % Results relationship + \draw[fill=cE, opacity=0.25, dashed] ($(resultChunkSchema.north west)+(+1.25cm,-5.1cm)$) rectangle ($(resultChunkSchema.north west)+(+3cm,-5.52cm)$) {}; + \draw[fill=cE, opacity=0.25, dashed] ($(documentsSchema.north west)+(+6.55cm,-3.375cm)$) rectangle ($(documentsSchema.north west)+(+7.125cm,-3.85cm)$) {}; + \draw[-latex] ($(documentsSchema.north west)+(+7.125cm,-3.6cm)$) -- ($(resultChunkSchema.north west)+(+1.25cm,-5.3cm)$); + % + \draw[fill=cF, opacity=0.25,dashed] ($(resultChunkSchema.north west)+(+1.25cm,-5.52cm)$) rectangle ($(resultChunkSchema.north west)+(+3cm,-5.95cm)$) {}; + \draw[fill=cF, opacity=0.25, dashed] ($(documentsSchema.north west)+(+6.35cm,-3.775cm)$) rectangle ($(documentsSchema.north west)+(+6.9cm,-4.25cm)$) {}; + \draw[-latex] ($(documentsSchema.north west)+(+6.9cm,-4cm)$) -- ($(resultChunkSchema.north west)+(+1.25cm,-5.75cm)$); +\end{tikzpicture} +\end{document} \ No newline at end of file diff --git a/static/assets/images/schema-design/sources/manhunt.tex b/static/assets/images/schema-design/sources/manhunt.tex new file mode 100644 index 0000000..b741c78 --- /dev/null +++ b/static/assets/images/schema-design/sources/manhunt.tex @@ -0,0 +1,103 @@ +\documentclass[border={20pt 20pt 20pt 20pt}]{standalone} +\usepackage{xcolor} +\usepackage{tikz} + +\definecolor{cA}{HTML}{646FA9} +\definecolor{cB}{HTML}{49A9B4} +\definecolor{cC}{HTML}{6BCD69} +\definecolor{cD}{HTML}{9B9C78} +\definecolor{cE}{HTML}{CBA18C} +\definecolor{cF}{HTML}{C4737C} +\definecolor{cG}{HTML}{DBDBDB} +\definecolor{cX}{HTML}{6A0DAD} + +\def\arraystretch{1.7} + +\begin{document} + +\begin{tikzpicture} +\node (x) { +\begin{tabular}{| c | c | c | c | c | c | c | c |} +\hline +& \textbf{PK} & \textbf{SK} & \textbf{GSI1PK} & \textbf{GSI1SK} & \textbf{GSI1PK} & \textbf{GSI2SK} & \textbf{Unindexed Attributes (Subset)} \\\hline +% +% +1 & \def\arraystretch{1}\begin{tabular}{c}\textbf{searchId} \\ \color{cA}\texttt{2ad6a19}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{attemptId} \\ \texttt{384f7b1}\end{tabular} & & & & & +% +\def\arraystretch{1}\begin{tabular}{c | c} +\textbf{contactId} & \textbf{domainId} \\ +\color{cC}\texttt{9d657f8} & \color{cE}\texttt{6f7ad1b} +\end{tabular} \\\hline +% +2 & \def\arraystretch{1}\begin{tabular}{c}\textbf{contactId} \\ \color{cB}\texttt{89fa8f8}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{methodId} \\ \texttt{538235b}\end{tabular} & & & & & +% +\def\arraystretch{1}\begin{tabular}{c} +\textbf{domainId} \\ \color{cF}\texttt{db3280c} +\end{tabular} \\\hline +% +3 & \def\arraystretch{1}\begin{tabular}{c}\textbf{contactId} \\ \color{cB}\texttt{89fa8f8}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{methodId} \\ \texttt{800e613}\end{tabular} & & & & & +% +\def\arraystretch{1}\begin{tabular}{c} +\textbf{domainId} \\ \color{cF}\texttt{db3280c} +\end{tabular} \\\hline +% +4 & \def\arraystretch{1}\begin{tabular}{c}\textbf{searchId} \\ \color{cA}\texttt{2ad6a19}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{attemptId} \\ \texttt{8cbc75c}\end{tabular} & & & & & +% +\def\arraystretch{1}\begin{tabular}{c | c} +\textbf{contactId} & \textbf{domainId} \\ +\color{cC}\texttt{9d657f8} & \color{cE}\texttt{6f7ad1b} +\end{tabular} \\\hline +% +5 & \def\arraystretch{1}\begin{tabular}{c}\textbf{contactId} \\ \color{cC}\texttt{9d657f8}\end{tabular} & \texttt{contact} & \def\arraystretch{1}\begin{tabular}{c}\textbf{domainId} \\ \color{cE}\texttt{6f7ad1b}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{name} \\ \texttt{Arian Mellor}\end{tabular} & & & \\\hline +% +6 & \def\arraystretch{1}\begin{tabular}{c}\textbf{contactId} \\ \color{cB}\texttt{89fa8f8}\end{tabular} & \texttt{contact} & \def\arraystretch{1}\begin{tabular}{c}\textbf{domainId} \\ \color{cF}\texttt{db3280c}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{name} \\ \texttt{Zavier Curtis}\end{tabular} & & & \\\hline +% +7 & \def\arraystretch{1}\begin{tabular}{c}\textbf{contactId} \\ \color{cC}\texttt{9d657f8}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{methodId} \\ \texttt{d3fd3a0}\end{tabular} & & & & & +% +\def\arraystretch{1}\begin{tabular}{c} +\textbf{domainId} \\ \color{cE}\texttt{6f7ad1b} +\end{tabular} \\\hline +% +8 & \def\arraystretch{1}\begin{tabular}{c}\textbf{searchId} \\ \color{cD}\texttt{a271244}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{attemptId} \\ \texttt{daf3d11}\end{tabular} & & & & & +% +\def\arraystretch{1}\begin{tabular}{c | c} +\textbf{contactId} & \textbf{domainId} \\ +\color{cB}\texttt{89fa8f8} & \color{cF}\texttt{db3280c} \\ +\end{tabular} \\\hline +% +9 & \def\arraystretch{1}\begin{tabular}{c}\textbf{searchId} \\ \color{cD}\texttt{a271244}\end{tabular} & \texttt{search} & \def\arraystretch{1}\begin{tabular}{c}\textbf{contactId} \\ \color{cB}\texttt{89fa8f8}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{startedDatetime} \\ \texttt{2019-12-30T19:32:14Z}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{status, contactId} \\ \texttt{active\#}\color{cB}\texttt{89fa8f8}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{startedDatetime} \\ \texttt{2019-12-30T19:32:14Z}\end{tabular} & +% +\def\arraystretch{1}\begin{tabular}{c} +\textbf{domainId} \\ \color{cF}\texttt{db3280c} +\end{tabular} \\\hline +% +10 & \def\arraystretch{1}\begin{tabular}{c}\textbf{searchId} \\ \color{cA}\texttt{2ad6a19}\end{tabular} & \texttt{search} & \def\arraystretch{1}\begin{tabular}{c}\textbf{contactId} \\ \color{cC}\texttt{9d657f8}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{startedDatetime} \\ \texttt{2019-12-30T12:14:01Z}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{status, contactId} \\ \texttt{canceled\#}\color{cC}\texttt{9d657f8}\end{tabular} & \def\arraystretch{1}\begin{tabular}{c}\textbf{startedDatetime} \\ \texttt{2019-12-30T12:14:01Z}\end{tabular} & +% +\def\arraystretch{1}\begin{tabular}{c} +\textbf{domainId} \\ \color{cE}\texttt{6f7ad1b} +\end{tabular} \\\hline +% +\end{tabular} +}; + +%\draw[line width=0mm, fill=gray!20, fill opacity=0.9] (-12.845, +1.425) rectangle ++(26.46, 1.575); +%\draw[line width=0mm, fill=gray!20, fill opacity=0.9] (-12.845, -3.725) rectangle ++(26.46, 3.35); +% +%\draw[line width=0.5mm, draw=black!30!green] (-12.9, -4.7) rectangle ++(26.575, 0.95); +%\draw[line width=0.5mm, draw=black!30!green] (-12.9, +0.45) rectangle ++(26.575, 0.95); +%\draw[line width=0.5mm, draw=black!30!green] (-12.9, +3.02) rectangle ++(26.575, 0.95); +% +%\draw[line width=0.5mm, draw=black!30!blue] (-12.9, +3.02) rectangle ++(2.6125, 0.95); +%\draw[line width=0.5mm, draw=black!30!blue] (-12.9, +0.45) rectangle ++(2.6125, 0.95); +%\draw[line width=0.5mm, draw=black!30!blue] (-7.75, -4.7) rectangle ++(2.6125, 0.95); +%\draw[line width=0.5mm, draw=black!30!blue] (+9.35, -4.7) rectangle ++(2.6125, 0.95); +% +%\draw[line width=0.5mm, draw=black!30!red] (-12.9, -0.41) rectangle ++(2.6125, 0.86); +%\draw[line width=0.5mm, draw=black!30!red] (-7.75, -0.41) rectangle ++(2.6125, 0.86); +% +%\draw[-latex ,line width=0.25mm] (-6.5, -3.75) to[out=90, in=-90] (-11.575, -0.41); +%\draw[-latex ,line width=0.25mm] (+10.75, -3.75) to[out=90, in=-90, looseness=0.75] (-6.5, -0.41); +%\draw[-latex, line width=0.25mm] (-12.9, +3.52) to[out=225, in=135] (-12.9, -4.2); +%\draw[-latex, line width=0.25mm] (-12.9, +1.0) to[out=225, in=135] (-12.9, -4.2); +\end{tikzpicture} +\end{document} diff --git a/static/assets/images/schema-design/sources/postgres.png b/static/assets/images/schema-design/sources/postgres.png new file mode 100644 index 0000000..fd9c7ec Binary files /dev/null and b/static/assets/images/schema-design/sources/postgres.png differ diff --git a/static/assets/images/schema-design/sources/sqlite.png b/static/assets/images/schema-design/sources/sqlite.png new file mode 100644 index 0000000..5c11e06 Binary files /dev/null and b/static/assets/images/schema-design/sources/sqlite.png differ diff --git a/static/assets/images/schemas/deposition-schema.png b/static/assets/images/schemas/deposition-schema.png deleted file mode 100644 index 75b294c..0000000 Binary files a/static/assets/images/schemas/deposition-schema.png and /dev/null differ diff --git a/static/assets/images/schemas/lsif-schema.png b/static/assets/images/schemas/lsif-schema.png deleted file mode 100644 index c532924..0000000 Binary files a/static/assets/images/schemas/lsif-schema.png and /dev/null differ